From: Keir Fraser Date: Mon, 20 Oct 2008 16:17:55 +0000 (+0100) Subject: x86, spinlock: Get rid of .text.lock out-of-line section. X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~14066^2~15 X-Git-Url: https://dgit.raspbian.org/%22http://www.example.com/cgi/%22/%22http:/www.example.com/cgi/%22?a=commitdiff_plain;h=c604d2c2e0cd96e9f64b05217397bb817cdaeb77;p=xen.git x86, spinlock: Get rid of .text.lock out-of-line section. We don't care about code bloat now that spinlock operations are not inlined into callers. This will make backtraces easier to read. Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/x86_32/xen.lds.S b/xen/arch/x86/x86_32/xen.lds.S index 2c3d21fced..99fffca441 100644 --- a/xen/arch/x86/x86_32/xen.lds.S +++ b/xen/arch/x86/x86_32/xen.lds.S @@ -26,7 +26,6 @@ SECTIONS *(.fixup) *(.gnu.warning) } :text =0x9090 - .text.lock : { *(.text.lock) } :text /* out-of-line lock text */ _etext = .; /* End of text section */ diff --git a/xen/arch/x86/x86_64/xen.lds.S b/xen/arch/x86/x86_64/xen.lds.S index 55559f4678..4f840107ce 100644 --- a/xen/arch/x86/x86_64/xen.lds.S +++ b/xen/arch/x86/x86_64/xen.lds.S @@ -24,7 +24,6 @@ SECTIONS *(.fixup) *(.gnu.warning) } :text = 0x9090 - .text.lock : { *(.text.lock) } :text /* out-of-line lock text */ _etext = .; /* End of text section */ diff --git a/xen/include/asm-x86/rwlock.h b/xen/include/asm-x86/rwlock.h index e8c8846cb5..e4474d4a25 100644 --- a/xen/include/asm-x86/rwlock.h +++ b/xen/include/asm-x86/rwlock.h @@ -22,25 +22,19 @@ #define __build_read_lock_ptr(rw, helper) \ asm volatile(LOCK "subl $1,(%0)\n\t" \ - "js 2f\n" \ + "jns 1f\n\t" \ + "call " helper "\n\t" \ "1:\n" \ - ".section .text.lock,\"ax\"\n" \ - "2:\tcall " helper "\n\t" \ - "jmp 1b\n" \ - ".previous" \ ::"a" (rw) : "memory") #define __build_read_lock_const(rw, helper) \ asm volatile(LOCK "subl $1,%0\n\t" \ - "js 2f\n" \ - "1:\n" \ - ".section .text.lock,\"ax\"\n" \ - "2:\tpush %%"__OP"ax\n\t" \ + "jns 1f\n\t" \ + "push %%"__OP"ax\n\t" \ "lea %0,%%"__OP"ax\n\t" \ "call " helper "\n\t" \ "pop %%"__OP"ax\n\t" \ - "jmp 1b\n" \ - ".previous" \ + "1:\n" \ :"=m" (*(volatile int *)rw) : : "memory") #define __build_read_lock(rw, helper) do { \ @@ -52,25 +46,19 @@ #define __build_write_lock_ptr(rw, helper) \ asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ - "jnz 2f\n" \ + "jz 1f\n\t" \ + "call " helper "\n\t" \ "1:\n" \ - ".section .text.lock,\"ax\"\n" \ - "2:\tcall " helper "\n\t" \ - "jmp 1b\n" \ - ".previous" \ ::"a" (rw) : "memory") #define __build_write_lock_const(rw, helper) \ asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ - "jnz 2f\n" \ - "1:\n" \ - ".section .text.lock,\"ax\"\n" \ - "2:\tpush %%"__OP"ax\n\t" \ + "jz 1f\n\t" \ + "push %%"__OP"ax\n\t" \ "lea %0,%%"__OP"ax\n\t" \ "call " helper "\n\t" \ "pop %%"__OP"ax\n\t" \ - "jmp 1b\n" \ - ".previous" \ + "1:\n" \ :"=m" (*(volatile int *)rw) : : "memory") #define __build_write_lock(rw, helper) do { \ diff --git a/xen/include/asm-x86/spinlock.h b/xen/include/asm-x86/spinlock.h index 7dc1da0bd8..f5503a2820 100644 --- a/xen/include/asm-x86/spinlock.h +++ b/xen/include/asm-x86/spinlock.h @@ -18,14 +18,13 @@ typedef struct { static inline void _raw_spin_lock(raw_spinlock_t *lock) { asm volatile ( - "1: lock; decb %0 \n" - " js 2f \n" - ".section .text.lock,\"ax\"\n" + "1: lock; decw %0 \n" + " jns 3f \n" "2: rep; nop \n" - " cmpb $0,%0 \n" + " cmpw $0,%0 \n" " jle 2b \n" " jmp 1b \n" - ".previous" + "3:" : "=m" (lock->lock) : : "memory" ); } @@ -33,16 +32,16 @@ static inline void _raw_spin_unlock(raw_spinlock_t *lock) { ASSERT(_raw_spin_is_locked(lock)); asm volatile ( - "movb $1,%0" + "movw $1,%0" : "=m" (lock->lock) : : "memory" ); } static inline int _raw_spin_trylock(raw_spinlock_t *lock) { - char oldval; + s16 oldval; asm volatile ( - "xchgb %b0,%1" - :"=q" (oldval), "=m" (lock->lock) + "xchgw %w0,%1" + :"=r" (oldval), "=m" (lock->lock) :"0" (0) : "memory" ); return (oldval > 0); }